summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH Lohaus <hlohaus@users.noreply.github.com>2024-02-09 08:51:49 +0100
committerGitHub <noreply@github.com>2024-02-09 08:51:49 +0100
commit1d6709dafc27f7ce64df235b20ad47e2d3486ab0 (patch)
tree59e40d1130f97d219aacc9de37f4d3f6f2bc3274
parentMerge pull request #1564 from hlohaus/gemini (diff)
parentFix Phind Provider / add generate_challenge (diff)
downloadgpt4free-0.2.1.1.tar
gpt4free-0.2.1.1.tar.gz
gpt4free-0.2.1.1.tar.bz2
gpt4free-0.2.1.1.tar.lz
gpt4free-0.2.1.1.tar.xz
gpt4free-0.2.1.1.tar.zst
gpt4free-0.2.1.1.zip
-rw-r--r--README.md15
-rw-r--r--g4f/Provider/Phind.py50
-rw-r--r--g4f/Provider/needs_auth/OpenaiChat.py25
-rw-r--r--g4f/models.py2
-rw-r--r--setup.py2
5 files changed, 83 insertions, 11 deletions
diff --git a/README.md b/README.md
index 91826b9a..797ce304 100644
--- a/README.md
+++ b/README.md
@@ -358,6 +358,21 @@ response = g4f.ChatCompletion.create(
# Displaying the response
print(response)
+
+from g4f.image import ImageResponse
+
+# Get image links from response
+for chunk in g4f.ChatCompletion.create(
+ model=g4f.models.default, # Using the default model
+ provider=g4f.Provider.OpenaiChat, # Specifying the provider as OpenaiChat
+ messages=[{"role": "user", "content": "Create images with dogs"}],
+ access_token="...", # Need a access token from a plus user
+ stream=True,
+ ignore_stream=True
+):
+ if isinstance(chunk, ImageResponse):
+ print(chunk.images) # Print generated image links
+ print(chunk.alt) # Print used prompt for image generation
```
##### Using Browser
diff --git a/g4f/Provider/Phind.py b/g4f/Provider/Phind.py
index e7156842..a7fdbeca 100644
--- a/g4f/Provider/Phind.py
+++ b/g4f/Provider/Phind.py
@@ -1,5 +1,6 @@
from __future__ import annotations
+from urllib import parse
from datetime import datetime
from ..typing import AsyncResult, Messages
@@ -55,9 +56,9 @@ class Phind(AsyncGeneratorProvider):
"customLinks": []
},
"context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
- "rewrittenQuestion": prompt,
- "challenge": 0.21132115912208504
}
+ data["challenge"] = generate_challenge(data)
+
async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
new_line = False
async for line in response.iter_lines():
@@ -65,6 +66,8 @@ class Phind(AsyncGeneratorProvider):
chunk = line[6:]
if chunk.startswith(b'<PHIND_DONE/>'):
break
+ if chunk.startswith(b'<PHIND_BACKEND_ERROR>'):
+ raise RuntimeError(f"Response: {chunk}")
if chunk.startswith(b'<PHIND_WEBRESULTS>') or chunk.startswith(b'<PHIND_FOLLOWUP>'):
pass
elif chunk.startswith(b"<PHIND_METADATA>") or chunk.startswith(b"<PHIND_INDICATOR>"):
@@ -78,3 +81,46 @@ class Phind(AsyncGeneratorProvider):
new_line = False
else:
new_line = True
+
+def deterministic_stringify(obj):
+ def handle_value(value):
+ if isinstance(value, (dict, list)):
+ if isinstance(value, list):
+ return '[' + ','.join(sorted(map(handle_value, value))) + ']'
+ else: # It's a dict
+ return '{' + deterministic_stringify(value) + '}'
+ elif isinstance(value, bool):
+ return 'true' if value else 'false'
+ elif isinstance(value, (int, float)):
+ return format(value, '.8f').rstrip('0').rstrip('.')
+ elif isinstance(value, str):
+ return f'"{value}"'
+ else:
+ return 'null'
+
+ items = sorted(obj.items(), key=lambda x: x[0])
+ return ','.join([f'{k}:{handle_value(v)}' for k, v in items if handle_value(v) is not None])
+
+def simple_hash(s):
+ d = 0
+ for char in s:
+ if len(char) > 1 or ord(char) >= 256:
+ continue
+ d = ((d << 5) - d + ord(char[0])) & 0xFFFFFFFF
+ if d > 0x7FFFFFFF: # 2147483647
+ d -= 0x100000000 # Subtract 2**32
+ return d
+
+def generate_challenge(obj):
+ deterministic_str = deterministic_stringify(obj)
+ encoded_str = parse.quote(deterministic_str, safe='')
+
+ c = simple_hash(encoded_str)
+ a = (9301 * c + 49297)
+ b = 233280
+
+ # If negativ, we need a special logic
+ if a < 0:
+ return ((a%b)-b)/b
+ else:
+ return a%b/b \ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
index 32aee9fb..c122da46 100644
--- a/g4f/Provider/needs_auth/OpenaiChat.py
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -342,26 +342,30 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
raise MissingAuthError(f'Missing "access_token"')
cls._cookies = cookies
- headers = {"Authorization": f"Bearer {access_token}"}
+ auth_headers = {"Authorization": f"Bearer {access_token}"}
async with StreamSession(
proxies={"https": proxy},
impersonate="chrome110",
timeout=timeout,
- cookies=dict([(name, value) for name, value in cookies.items() if name == "_puid"])
+ headers={"Cookie": "; ".join(f"{k}={v}" for k, v in cookies.items())}
) as session:
try:
image_response = None
if image:
- image_response = await cls.upload_image(session, headers, image, kwargs.get("image_name"))
+ image_response = await cls.upload_image(session, auth_headers, image, kwargs.get("image_name"))
except Exception as e:
yield e
end_turn = EndTurn()
- model = cls.get_model(model or await cls.get_default_model(session, headers))
+ model = cls.get_model(model or await cls.get_default_model(session, auth_headers))
model = "text-davinci-002-render-sha" if model == "gpt-3.5-turbo" else model
while not end_turn.is_end:
+ arkose_token = await cls.get_arkose_token(session)
data = {
"action": action,
- "arkose_token": await cls.get_arkose_token(session),
+ "arkose_token": arkose_token,
+ "conversation_mode": {"kind": "primary_assistant"},
+ "force_paragen": False,
+ "force_rate_limit": False,
"conversation_id": conversation_id,
"parent_message_id": parent_id,
"model": model,
@@ -373,7 +377,11 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
async with session.post(
f"{cls.url}/backend-api/conversation",
json=data,
- headers={"Accept": "text/event-stream", **headers}
+ headers={
+ "Accept": "text/event-stream",
+ "OpenAI-Sentinel-Arkose-Token": arkose_token,
+ **auth_headers
+ }
) as response:
if not response.ok:
raise RuntimeError(f"Response {response.status_code}: {await response.text()}")
@@ -439,7 +447,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Returns:
tuple[str, dict]: A tuple containing the access token and cookies.
"""
- with get_browser(proxy=proxy) as driver:
+ driver = get_browser(proxy=proxy)
+ try:
driver.get(f"{cls.url}/")
WebDriverWait(driver, timeout).until(EC.presence_of_element_located((By.ID, "prompt-textarea")))
access_token = driver.execute_script(
@@ -451,6 +460,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"return accessToken;"
)
return access_token, get_driver_cookies(driver)
+ finally:
+ driver.close()
@classmethod
async def get_arkose_token(cls, session: StreamSession) -> str:
diff --git a/g4f/models.py b/g4f/models.py
index dd8e175d..ed86024e 100644
--- a/g4f/models.py
+++ b/g4f/models.py
@@ -89,7 +89,7 @@ gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = RetryProvider([
- Bing, Phind, Liaobots,
+ Bing, Liaobots,
])
)
diff --git a/setup.py b/setup.py
index b8383a7a..4f3b0359 100644
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@ INSTALL_REQUIRE = [
EXTRA_REQUIRE = {
'all': [
- "curl_cffi>=0.5.10",
+ "curl_cffi>=0.6.0b9",
"certifi",
"async-property", # openai
"py-arkose-generator", # openai